# CREATE OUR DATASET FROM DEFI LLAMA SOURCE
import pandas as pd
import json
import time
import os
# load json
df = pd.read_json('defillamaprotocols.json')
print (str(len(df)) + " protocols to start with")
# filter df to only include protocols with TVL > 0
df = df[df['tvl'] > 0]
# filter df to remove categories "CEX" and "Chain"
df = df[df['category'] != "CEX"]
df = df[df['category'] != "Chain"]
print (str(len(df)) + " protocols with TVL > 0")
results = pd.DataFrame(columns=['category', 'name', 'forkedFrom', 'tvl', 'chain'])
for index, row in df.iterrows():
new_row = pd.DataFrame({'category': [row['category']], 'name': [row['name']], 'forkedFrom': [row['forkedFrom']], 'tvl': [row['tvl']], 'chain': [row['chain']]})
results = pd.concat([results, new_row], ignore_index=True)
# tidy this up as it's inconsistent otherwise
results['forkedFrom'] = results['forkedFrom'].apply(lambda x: x if x != [] else None)
# save results as csv
results.to_csv('protocols_all_small.csv', index=False)
2875 protocols to start with 2368 protocols with TVL > 0
# GET SUMMARY STATS ON FORKS PER CATEGORY
df = pd.read_csv('protocols_all_small.csv')
# for each category, report:
# 1. total TVL
# 2. total TVL in forks
# 3. percentage of TVL in forks
# 4. number of protocols in category
# 5. number of protocols in category that are forks
# 6. percentage of protocols in category that are forks
# 7. average TVL of protocols in category
# 8. average TVL of protocols in category that are forks
forks_summary = pd.DataFrame(columns=['category', 'total_tvl', 'total_tvl_forks', 'percent_tvl_forks', 'num_protocols', 'num_protocols_forks', 'percent_protocols_forks', 'avg_tvl', 'avg_tvl_forks'])
# loop through each category and find the total TVL of all protocols in that category
for category in df['category'].unique():
# create a new DataFrame with the row data
new_row = pd.DataFrame({
'category': [category],
'total_tvl': [df[df['category'] == category]['tvl'].sum()],
'total_tvl_forks': [df[(df['category'] == category) & (df['forkedFrom'].notna())]['tvl'].sum()],
'percent_tvl_forks': [df[(df['category'] == category) & (df['forkedFrom'].notna())]['tvl'].sum() / df[df['category'] == category]['tvl'].sum()],
'num_protocols': [len(df[df['category'] == category])],
'num_protocols_forks': [len(df[(df['category'] == category) & (df['forkedFrom'].notna())])],
'percent_protocols_forks': [len(df[(df['category'] == category) & (df['forkedFrom'].notna())]) / len(df[df['category'] == category])],
'avg_tvl': [df[df['category'] == category]['tvl'].mean()],
'avg_tvl_forks': [df[(df['category'] == category) & (df['forkedFrom'].notna())]['tvl'].mean()]
})
# concatenate the new row with the existing results DataFrame
forks_summary = pd.concat([forks_summary, new_row], ignore_index=True)
# save results as csv
forks_summary.to_csv('forks_summary.csv', index=False)
# Find what percentage of tvl of each category is covered by the largest n protocols by tvl and their respective forks
df = pd.read_csv('protocols_all_small.csv')
n = 5
results = pd.DataFrame(columns=['category', 'largest_protocols', 'percent_tvl_largest_protocols', 'percent_tvl_largest_protocols_and_forks', 'total_tvl_largest_protocols', 'total_tvl_largest_protocols_and_forks'])
for category in df['category'].unique():
# find the n largest protocols by TVL
largest_protocols = df[df['category'] == category].nlargest(n, 'tvl')
# find the total TVL of the n largest protocols
total_tvl_largest_protocols = largest_protocols['tvl'].sum()
# get protocols in this category where forkedFrom is not empty
mask = df[(df['category'] == category) & (df['forkedFrom'].notna())]
# get protocols in this category where forkedFrom is not empty and contains any of the n largest protocols
mask = mask[mask['forkedFrom'].apply(lambda x: any(item for item in largest_protocols['name'].tolist() if item in x))]
fork_protocols = mask
# if fork_protocols is not empty, find the sum of tvl of all protocols in fork_protocols
if len(fork_protocols) > 0:
total_tvl_fork_protocols = fork_protocols['tvl'].sum()
fork_protocol_names = fork_protocols['name'].tolist()
else:
total_tvl_fork_protocols = 0
fork_protocol_names = []
# calculate the percentage of TVL in this category that is in the n largest protocols
percent_tvl_largest_protocols = total_tvl_largest_protocols / df[df['category'] == category]['tvl'].sum()
# calculate the percentage of TVL in this category that is in the n largest protocols and their forks
percent_tvl_largest_protocols_and_forks = (total_tvl_largest_protocols + total_tvl_fork_protocols) / df[df['category'] == category]['tvl'].sum()
# calculate the total_tvl_largest_protocols_and_forks
total_tvl_largest_protocols_and_forks = total_tvl_largest_protocols + total_tvl_fork_protocols
# fill results
new_row = pd.DataFrame({
'category': category,
'largest_protocols': [largest_protocols['name'].tolist()],
'percent_tvl_largest_protocols': [percent_tvl_largest_protocols],
'percent_tvl_largest_protocols_and_forks': [percent_tvl_largest_protocols_and_forks],
'total_tvl_largest_protocols': [total_tvl_largest_protocols],
'total_tvl_largest_protocols_and_forks': [total_tvl_largest_protocols_and_forks],
'fork_protocols': [fork_protocol_names],
'total_tvl_fork_protocols': [total_tvl_fork_protocols]
})
results = pd.concat([results, new_row], ignore_index=True)
# save results as csv
results.to_csv('top5.csv', index=False)
# create a stacked barchart with plotly, x-axis is category, y-axis is percent_tvl_largest_protocols, percent_tvl_largest_protocols_and_forks
# For all "original" protocols, find the sum of their TVL and their forks.
df = pd.read_csv('protocols_all_small.csv')
# create an empty DataFrame to store the results
results = pd.DataFrame(columns=['category', 'protocol_name', 'num_forks', 'total_tvl', 'tvl_orig', 'tvl_forks', 'forks_names'])
# iterate over each category
for category in df['category'].unique():
# filter the protocols in the category where forkedFrom is empty
orig_protocols = df[(df['category'] == category) & (df['forkedFrom'].isnull())]
# find the sum of TVL for each orig_protocol and all fork_protocols that have its name in forkedFrom
for index, protocol in orig_protocols.iterrows():
# find the sum of TVL for the protocol and all protocols that have its name in forkedFrom
# get protocols in this category where forkedFrom is not empty
mask = df[(df['category'] == category) & (df['forkedFrom'].notna())]
# get protocols in this category where forkedFrom is not empty and contains any of the 5 largest protocols
mask = mask[mask['forkedFrom'].apply(lambda x: any(item for item in [protocol['name']] if item in x))]
forks = mask
# if forks is not empty, find the sum of tvl of all protocols in forks
if len(forks) > 0:
total_tvl_forks = forks['tvl'].sum()
forks_names = forks['name'].tolist()
else:
total_tvl_forks = 0
forks_names = []
# calculate the total_tvl_orig_and_forks
total_tvl_orig_and_forks = protocol['tvl'] + total_tvl_forks
# fill results
new_row = pd.DataFrame({
'category': category,
'protocol_name': protocol['name'],
'num_forks': len(forks),
'total_tvl': total_tvl_orig_and_forks,
'tvl_orig': protocol['tvl'],
'tvl_forks': total_tvl_forks,
'forks_names': [forks_names]
}, index=[0])
results = pd.concat([results, new_row], ignore_index=True)
#
results
# save to csv
results.to_csv('forks_by_protocol.csv', index=False)
# pretty print results, all tvl values in millions of $
df = pd.read_csv('forks_by_protocol.csv')
df = pd.read_csv('forks_summary.csv')
df['percent_tvl_forks'] = df['percent_tvl_forks'].apply(lambda x: str(round(x * 100, 2)) + "%")
df['percent_protocols_forks'] = df['percent_protocols_forks'].apply(lambda x: str(round(x * 100, 2)) + "%")
# format total_tvl, total_tvl_forks, avg_tvl and avg_tvl_forks as millions of $ with 2 decimal places
df['total_tvl'] = df['total_tvl'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df['total_tvl_forks'] = df['total_tvl_forks'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df['avg_tvl'] = df['avg_tvl'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df['avg_tvl_forks'] = df['avg_tvl_forks'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df
| category | total_tvl | total_tvl_forks | percent_tvl_forks | num_protocols | num_protocols_forks | percent_protocols_forks | avg_tvl | avg_tvl_forks | |
|---|---|---|---|---|---|---|---|---|---|
| 0 | Liquid Staking | $19559.06M | $109.39M | 0.56% | 86 | 4 | 4.65% | $227.43M | $27.35M |
| 1 | CDP | $9311.14M | $143.9M | 1.55% | 89 | 26 | 29.21% | $104.62M | $5.53M |
| 2 | Bridge | $10431.25M | $190.93M | 1.83% | 41 | 2 | 4.88% | $254.42M | $95.47M |
| 3 | Dexes | $16444.37M | $4120.06M | 25.05% | 818 | 501 | 61.25% | $20.1M | $8.22M |
| 4 | Lending | $13951.75M | $1768.94M | 12.68% | 237 | 106 | 44.73% | $58.87M | $16.69M |
| 5 | Yield | $5044.37M | $38.53M | 0.76% | 351 | 28 | 7.98% | $14.37M | $1.38M |
| 6 | Services | $2348.45M | $0.0M | 0.0% | 48 | 1 | 2.08% | $48.93M | $0.0M |
| 7 | Derivatives | $1348.02M | $41.08M | 3.05% | 108 | 32 | 29.63% | $12.48M | $1.28M |
| 8 | Yield Aggregator | $1226.31M | $29.62M | 2.42% | 90 | 28 | 31.11% | $13.63M | $1.06M |
| 9 | Cross Chain | $659.35M | $87.15M | 13.22% | 20 | 1 | 5.0% | $32.97M | $87.15M |
| 10 | Synthetics | $605.31M | $8.66M | 1.43% | 30 | 4 | 13.33% | $20.18M | $2.17M |
| 11 | Privacy | $310.62M | $0.08M | 0.02% | 12 | 3 | 25.0% | $25.89M | $0.03M |
| 12 | Insurance | $335.0M | $0.49M | 0.15% | 21 | 1 | 4.76% | $15.95M | $0.49M |
| 13 | Indexes | $582.05M | $0.25M | 0.04% | 42 | 3 | 7.14% | $13.86M | $0.08M |
| 14 | Launchpad | $623.03M | $0.44M | 0.07% | 19 | 1 | 5.26% | $32.79M | $0.44M |
| 15 | Liquidity manager | $368.69M | $1.12M | 0.3% | 22 | 1 | 4.55% | $16.76M | $1.12M |
| 16 | NFT Lending | $365.46M | $0.77M | 0.21% | 21 | 1 | 4.76% | $17.4M | $0.77M |
| 17 | Payments | $208.07M | $0.0M | 0.0% | 10 | 0 | 0.0% | $20.81M | $nanM |
| 18 | Algo-Stables | $265.17M | $0.7M | 0.26% | 44 | 19 | 43.18% | $6.03M | $0.04M |
| 19 | NFT Marketplace | $150.51M | $0.0M | 0.0% | 14 | 0 | 0.0% | $10.75M | $nanM |
| 20 | RWA | $431.87M | $0.0M | 0.0% | 17 | 0 | 0.0% | $25.4M | $nanM |
| 21 | Leveraged Farming | $193.39M | $6.4M | 3.31% | 13 | 1 | 7.69% | $14.88M | $6.4M |
| 22 | Staking Pool | $53.89M | $0.0M | 0.0% | 10 | 0 | 0.0% | $5.39M | $nanM |
| 23 | Options Vault | $78.91M | $0.23M | 0.29% | 15 | 1 | 6.67% | $5.26M | $0.23M |
| 24 | Options | $123.17M | $0.05M | 0.04% | 36 | 1 | 2.78% | $3.42M | $0.05M |
| 25 | Reserve Currency | $6.74M | $0.01M | 0.09% | 52 | 43 | 82.69% | $0.13M | $0.0M |
| 26 | Uncollateralized Lending | $12.38M | $0.2M | 1.61% | 7 | 1 | 14.29% | $1.77M | $0.2M |
| 27 | Prediction Market | $21.6M | $0.0M | 0.0% | 20 | 0 | 0.0% | $1.08M | $nanM |
| 28 | Farm | $20.66M | $5.47M | 26.46% | 62 | 30 | 48.39% | $0.33M | $0.18M |
| 29 | RWA Lending | $2.39M | $0.0M | 0.0% | 3 | 0 | 0.0% | $0.8M | $nanM |
| 30 | Gaming | $2.08M | $0.0M | 0.0% | 9 | 0 | 0.0% | $0.23M | $nanM |
| 31 | Oracle | $0.0M | $0.0M | 0.0% | 1 | 0 | 0.0% | $0.0M | $nanM |
If we were to implement BPs for the top 5 protocols in each category, what percentage of the total TVL within the category would we cover?
df = pd.read_csv('top5.csv')
df['percent_tvl_largest_protocols'] = df['percent_tvl_largest_protocols'].apply(lambda x: str(round(x * 100, 2)) + "%")
df['percent_tvl_largest_protocols_and_forks'] = df['percent_tvl_largest_protocols_and_forks'].apply(lambda x: str(round(x * 100, 2)) + "%")
df['total_tvl_largest_protocols'] = df['total_tvl_largest_protocols'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df['total_tvl_largest_protocols_and_forks'] = df['total_tvl_largest_protocols_and_forks'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df['total_tvl_fork_protocols'] = df['total_tvl_fork_protocols'].apply(lambda x: "$" + str(round(x / 1000000, 2)) + "M")
df
| category | largest_protocols | percent_tvl_largest_protocols | percent_tvl_largest_protocols_and_forks | total_tvl_largest_protocols | total_tvl_largest_protocols_and_forks | fork_protocols | total_tvl_fork_protocols | |
|---|---|---|---|---|---|---|---|---|
| 0 | Liquid Staking | ['Lido', 'Coinbase Wrapped Staked ETH', 'Rocke... | 91.47% | 91.96% | $17890.06M | $17985.56M | ['OKC Liquid Staking'] | $95.5M |
| 1 | CDP | ['MakerDAO', 'JustStables', 'Liquity', 'Abraca... | 92.68% | 94.0% | $8629.27M | $8752.71M | ['Helio Protocol', 'Gravita Protocol', 'DeFi F... | $123.44M |
| 2 | Bridge | ['WBTC', 'JustCryptos', 'Multichain', 'Portal'... | 90.24% | 90.24% | $9412.66M | $9412.66M | [] | $0.0M |
| 3 | Dexes | ['Curve DEX', 'Uniswap V3', 'PancakeSwap AMM',... | 63.0% | 83.85% | $10359.96M | $13788.75M | ['PancakeSwap AMM', 'SushiSwap', 'PancakeSwap ... | $3428.79M |
| 4 | Lending | ['AAVE V2', 'JustLend', 'AAVE V3', 'Compound',... | 78.13% | 87.39% | $10900.66M | $12192.63M | ['Venus', 'Benqi Lending', 'Tectonic', 'Flux F... | $1291.97M |
| 5 | Yield | ['Convex Finance', 'Aura', 'Coinwind', 'Conic ... | 81.53% | 82.2% | $4112.8M | $4146.72M | ['Wombex Finance', 'Quoll', 'Muuu Finance'] | $33.92M |
| 6 | Services | ['Instadapp', 'DefiSaver', 'CIAN', 'SPL Govern... | 98.12% | 98.12% | $2304.29M | $2304.29M | [] | $0.0M |
| 7 | Derivatives | ['GMX', 'dYdX', 'Gains Network', 'ApolloX', 'M... | 78.89% | 81.69% | $1063.46M | $1101.2M | ['Mummy Finance', 'QuickPerps', 'Alpaca Perpet... | $37.74M |
| 8 | Yield Aggregator | ['Yearn Finance', 'Beefy', 'Flamincome', 'Kava... | 73.64% | 74.82% | $903.04M | $917.52M | ['Autofarm', 'Vaporwave', 'Epsylon Finance', '... | $14.48M |
| 9 | Cross Chain | ['Stargate', 'Hop Protocol', 'Across', 'pNetwo... | 93.88% | 93.88% | $618.98M | $618.98M | [] | $0.0M |
| 10 | Synthetics | ['Synthetix', 'DefiChain Loans', 'Alchemix', '... | 96.94% | 97.11% | $586.81M | $587.8M | ['Scientix'] | $0.99M |
| 11 | Privacy | ['Tornado Cash', 'Aztec', 'Railgun', 'Sienna N... | 99.97% | 99.99% | $310.52M | $310.6M | ['ShadeCash', 'Typhoon Cash', 'Tonic Cash'] | $0.08M |
| 12 | Insurance | ['Nexus Mutual', 'Unslashed', 'Ease.org', 'Gua... | 95.91% | 95.91% | $321.29M | $321.29M | [] | $0.0M |
| 13 | Indexes | ['Bwatch', 'Set Protocol', 'Enzyme Finance', '... | 87.19% | 87.23% | $507.52M | $507.74M | ['Arch Ethereum Web3'] | $0.22M |
| 14 | Launchpad | ['UniCrypt', 'PinkSale', 'Team Finance', 'DxSa... | 97.34% | 97.34% | $606.45M | $606.45M | [] | $0.0M |
| 15 | Liquidity manager | ['Arrakis V1', 'Gamma', 'Bunni', 'DefiEdge', '... | 90.83% | 90.83% | $334.89M | $334.89M | [] | $0.0M |
| 16 | NFT Lending | ['BendDAO Lending', 'ParaSpace Lending', 'Blur... | 95.77% | 95.77% | $350.0M | $350.0M | [] | $0.0M |
| 17 | Payments | ['Lightning Network', 'Flexa', 'Sablier Financ... | 99.02% | 99.02% | $206.03M | $206.03M | [] | $0.0M |
| 18 | Algo-Stables | ['Frax', 'Mento', 'Frax FPI', 'UXD', 'Sigmausd'] | 97.37% | 97.39% | $258.21M | $258.25M | ['XUSD Money', 'Blindex'] | $0.04M |
| 19 | NFT Marketplace | ['Blur Bids', 'NFTX', 'SudoSwap', 'Caviar V1',... | 99.5% | 99.5% | $149.75M | $149.75M | [] | $0.0M |
| 20 | RWA | ['Ondo Finance', 'RealT Tokens', 'MatrixDock',... | 82.3% | 82.3% | $355.41M | $355.41M | [] | $0.0M |
| 21 | Leveraged Farming | ['Alpaca Leveraged Yield Farming', 'Gearbox', ... | 95.78% | 95.78% | $185.24M | $185.24M | [] | $0.0M |
| 22 | Staking Pool | ['WEMIX.FI Staking', 'ABC Pool', 'UniFi Stakin... | 93.77% | 93.77% | $50.53M | $50.53M | [] | $0.0M |
| 23 | Options Vault | ['Jones DAO', 'Ribbon', 'Thetanuts Finance', '... | 92.68% | 92.97% | $73.14M | $73.37M | ['Bonsai Strike'] | $0.23M |
| 24 | Options | ['Lyra', 'Dopex', 'Opyn Gamma', 'Hegic', 'Prem... | 72.2% | 72.2% | $88.93M | $88.93M | [] | $0.0M |
| 25 | Reserve Currency | ['The Idols', 'Pluto', 'Dogewhale', 'TempleDAO... | 99.44% | 99.44% | $6.71M | $6.71M | [] | $0.0M |
| 26 | Uncollateralized Lending | ['Maple', 'Clearpool', 'TrueFi', 'Union Protoc... | 97.38% | 97.38% | $12.05M | $12.05M | [] | $0.0M |
| 27 | Prediction Market | ['Polymarket', 'Gnosis Protocol v1', 'Azuro', ... | 84.44% | 84.44% | $18.24M | $18.24M | [] | $0.0M |
| 28 | Farm | ['VyFinance Vaults', 'Proteo Defi', 'GroveCoin... | 66.22% | 66.22% | $13.68M | $13.68M | [] | $0.0M |
| 29 | RWA Lending | ['Goldfinch', 'Centrifuge', 'Credix'] | 100.0% | 100.0% | $2.39M | $2.39M | [] | $0.0M |
| 30 | Gaming | ['AstarFarm', 'Aavegotchi', 'Luchadores', 'Min... | 99.97% | 99.97% | $2.08M | $2.08M | [] | $0.0M |
| 31 | Oracle | ['Nest Protocol Staking'] | 100.0% | 100.0% | $0.0M | $0.0M | [] | $0.0M |
Here we ask the question: how many unique protocols would we need to support in order to achieve 80%, 90%, 95% coverage per category.
Each bar shows a unique protocol, split into the original (blue) and it's forks (orange)
The green, orange, red vertical lines show the intervals of 80%, 90%, 95% respectively.
N.B we have filtered down to show only the following categories here: 'Dexes', 'Lending', 'Yield', 'Derivatives', 'CDP', 'Yield Aggregator'
import plotly.express as px
import plotly.io as pio
pio.renderers.default='notebook'
# read in the data
df = pd.read_csv('forks_by_protocol.csv')
all_protocols = pd.read_csv('protocols_all_small.csv')
# Filter to just the most interesting categories
df = df[df['category'].isin(['Dexes', 'Lending', 'Yield', 'Derivatives', 'CDP', 'Yield Aggregator', 'Options', 'Options Vault', 'Liquid Staking','Bridge', 'Synthetics'])]
# iterate over each category
for category in df['category'].unique():
# filter the data for the category
category_data = df[df['category'] == category]
# sort the data by total TVL in descending order
category_data = category_data.sort_values(by='total_tvl', ascending=False)
# calculate the cumulative sum of total TVL
category_data['cumulative_tvl'] = category_data['total_tvl'].cumsum()
# filter the protocols that cover up to 95% of total TVL
top_protocols = category_data[category_data['cumulative_tvl'] <= 0.95 * category_data['total_tvl'].sum()]
# calculate the TVL of the "other" category
other_tvl = category_data[category_data['cumulative_tvl'] > 0.95 * category_data['total_tvl'].sum()]['total_tvl'].sum()
# add the top protocols to the results DataFrame
results = pd.concat([top_protocols, pd.DataFrame({'protocol_name': ['other'], 'total_tvl': [other_tvl]})])
# create a bar chart for the category, stacked bar for each protcol, stacks for tvl_orig and tvl_forks
fig = px.bar(results, x='protocol_name', y=['tvl_orig', 'tvl_forks'], title=category, barmode='stack')
# how many bars until we get past 90% of total TVL
num_bars_80 = len(results[results['cumulative_tvl'] <= 0.8 * results['total_tvl'].sum()])
num_bars_90 = len(results[results['cumulative_tvl'] <= 0.9 * results['total_tvl'].sum()])
num_bars_95 = len(results[results['cumulative_tvl'] <= 0.95 * results['total_tvl'].sum()])
# add a vertical line to the chart to show the 80, 90, 95% thresholds
fig.add_vline(x=num_bars_80 - 0.5, line_width=3, line_dash="dash", line_color="green")
fig.add_vline(x=num_bars_90 - 0.5, line_width=3, line_dash="dash", line_color="orange")
fig.add_vline(x=num_bars_95 - 0.5, line_width=3, line_dash="dash", line_color="red")
# show the chart
fig.show()
# sanity checks
#print ('results_tvl =' + str(results['total_tvl'].sum()/1000000))
#print("all protocols tvl = " + str(all_protocols[all_protocols['category'] == category]['tvl'].sum()/1000000))
# print summary stats
print (str(len(results)) + " total protocols in this category")
print('{:.2f}M total TVL in this category'.format(all_protocols[all_protocols['category'] == category]['tvl'].sum()/1000000))
print (str(num_bars_80) + " protocols cover 80% of total TVL")
print (str(num_bars_90) + " protocols cover 90% of total TVL")
print (str(num_bars_95) + " protocols cover 95% of total TVL")
print ("\n\n")
10 total protocols in this category 19559.06M total TVL in this category 2 protocols cover 80% of total TVL 3 protocols cover 90% of total TVL 9 protocols cover 95% of total TVL
7 total protocols in this category 9311.14M total TVL in this category 1 protocols cover 80% of total TVL 3 protocols cover 90% of total TVL 6 protocols cover 95% of total TVL
7 total protocols in this category 10431.25M total TVL in this category 2 protocols cover 80% of total TVL 4 protocols cover 90% of total TVL 6 protocols cover 95% of total TVL
33 total protocols in this category 16444.37M total TVL in this category 5 protocols cover 80% of total TVL 16 protocols cover 90% of total TVL 32 protocols cover 95% of total TVL
10 total protocols in this category 13951.75M total TVL in this category 3 protocols cover 80% of total TVL 5 protocols cover 90% of total TVL 9 protocols cover 95% of total TVL
26 total protocols in this category 5044.37M total TVL in this category 3 protocols cover 80% of total TVL 12 protocols cover 90% of total TVL 25 protocols cover 95% of total TVL
15 total protocols in this category 1348.02M total TVL in this category 4 protocols cover 80% of total TVL 9 protocols cover 90% of total TVL 14 protocols cover 95% of total TVL
17 total protocols in this category 1226.31M total TVL in this category 6 protocols cover 80% of total TVL 11 protocols cover 90% of total TVL 16 protocols cover 95% of total TVL
4 total protocols in this category 605.31M total TVL in this category 1 protocols cover 80% of total TVL 2 protocols cover 90% of total TVL 3 protocols cover 95% of total TVL
6 total protocols in this category 78.91M total TVL in this category 2 protocols cover 80% of total TVL 3 protocols cover 90% of total TVL 5 protocols cover 95% of total TVL
16 total protocols in this category 123.17M total TVL in this category 7 protocols cover 80% of total TVL 11 protocols cover 90% of total TVL 15 protocols cover 95% of total TVL